From: Tim Deegan Date: Wed, 1 Sep 2010 10:23:48 +0000 (+0100) Subject: x86 shadow: remove the assumption that multipage shadows are contiguous X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~11550 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22Dat/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22Dat?a=commitdiff_plain;h=3117617710bcfc3ff93a65b4defc0fc1bf4e64a2;p=xen.git x86 shadow: remove the assumption that multipage shadows are contiguous and move from page to page using the linked list instead. Signed-off-by: Tim Deegan --- diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 3c5b6d3a08..2c5504c25d 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1214,8 +1214,8 @@ int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p, * l1 tables (covering 2MB of virtual address space each). Similarly, a * 32-bit guest l2 table (4GB va) needs to be shadowed by four * PAE/64-bit l2 tables (1GB va each). These multi-page shadows are - * contiguous and aligned; functions for handling offsets into them are - * defined in shadow.c (shadow_l1_index() etc.) + * not contiguous in memory; functions for handling offsets into them are + * defined in shadow/multi.c (shadow_l1_index() etc.) * * This table shows the allocation behaviour of the different modes: * diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index c6dc95f99e..59cd59417e 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -421,14 +421,28 @@ sh_guest_get_eff_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e) * way to see this is: a 32-bit guest L2 page maps 4GB of virtual address * space, while a PAE- or 64-bit shadow L2 page maps 1GB of virtual address * space.) - * - * For PAE guests, for every 32-bytes of guest L3 page table, we use 64-bytes - * of shadow (to store both the shadow, and the info that would normally be - * stored in page_info fields). This arrangement allows the shadow and the - * "page_info" fields to always be stored in the same page (in fact, in - * the same cache line), avoiding an extra call to map_domain_page(). */ +/* From one page of a multi-page shadow, find the next one */ +static inline mfn_t sh_next_page(mfn_t smfn) +{ + mfn_t next; + struct page_info *pg = mfn_to_page(smfn); + + ASSERT(pg->u.sh.type == SH_type_l1_32_shadow + || pg->u.sh.type == SH_type_fl1_32_shadow + || pg->u.sh.type == SH_type_l2_32_shadow); + ASSERT(pg->u.sh.type == SH_type_l2_32_shadow || pg->u.sh.head); + ASSERT(pg->list.next != PAGE_LIST_NULL); + + next = _mfn(pdx_to_pfn(pg->list.next)); + + /* XXX not for long */ ASSERT(mfn_x(next) == mfn_x(smfn) + 1); + ASSERT(mfn_to_page(next)->u.sh.type == pg->u.sh.type); + ASSERT(!mfn_to_page(next)->u.sh.head); + return next; +} + static inline u32 guest_index(void *ptr) { @@ -440,8 +454,8 @@ shadow_l1_index(mfn_t *smfn, u32 guest_index) { #if (GUEST_PAGING_LEVELS == 2) ASSERT(mfn_to_page(*smfn)->u.sh.head); - *smfn = _mfn(mfn_x(*smfn) + - (guest_index / SHADOW_L1_PAGETABLE_ENTRIES)); + if ( guest_index >= SHADOW_L1_PAGETABLE_ENTRIES ) + *smfn = sh_next_page(*smfn); return (guest_index % SHADOW_L1_PAGETABLE_ENTRIES); #else return guest_index; @@ -452,13 +466,12 @@ static u32 shadow_l2_index(mfn_t *smfn, u32 guest_index) { #if (GUEST_PAGING_LEVELS == 2) + int i; ASSERT(mfn_to_page(*smfn)->u.sh.head); // Because we use 2 shadow l2 entries for each guest entry, the number of // guest entries per shadow page is SHADOW_L2_PAGETABLE_ENTRIES/2 - // - *smfn = _mfn(mfn_x(*smfn) + - (guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2))); - + for ( i = 0; i < guest_index / (SHADOW_L2_PAGETABLE_ENTRIES / 2); i++ ) + *smfn = sh_next_page(*smfn); // We multiply by two to get the index of the first of the two entries // used to shadow the specified guest entry. return (guest_index % (SHADOW_L2_PAGETABLE_ENTRIES / 2)) * 2; @@ -1014,11 +1027,11 @@ static int shadow_set_l2e(struct vcpu *v, /* In 2-on-3 we work with pairs of l2es pointing at two-page * shadows. Reference counting and up-pointers track from the first * page of the shadow to the first l2e, so make sure that we're - * working with those: - * Align the pointer down so it's pointing at the first of the pair */ + * working with those: + * Start with a pair of identical entries */ + shadow_l2e_t pair[2] = { new_sl2e, new_sl2e }; + /* Align the pointer down so it's pointing at the first of the pair */ sl2e = (shadow_l2e_t *)((unsigned long)sl2e & ~(sizeof(shadow_l2e_t))); - /* Align the mfn of the shadow entry too */ - new_sl2e.l2 &= ~(1<arch.shadow_table[0]); int i; for ( i = 0; i < 4; i++ ) { #if GUEST_PAGING_LEVELS == 2 /* 2-on-3: make a PAE l3 that points at the four-page l2 */ - smfn = _mfn(pagetable_get_pfn(v->arch.shadow_table[0]) + i); + if ( i != 0 ) + smfn = sh_next_page(smfn); #else /* 3-on-3: make a PAE l3 that points at the four l2 pages */ smfn = pagetable_get_mfn(v->arch.shadow_table[i]);